Close

@InProceedings{SousaFernVasc:2021:NoSeNe,
               author = "Sousa, Eduardo Vera and Fernandes, Leandro A. F. and Vasconcelos, 
                         Cristina Nader",
          affiliation = "{Universidade Federal Fluminense } and {Universidade Federal 
                         Fluminense } and {Universidade Federal Fluminense}",
                title = "ConformalLayers: A non-linear sequential neural network with 
                         associative layers",
            booktitle = "Proceedings...",
                 year = "2021",
               editor = "Paiva, Afonso and Menotti, David and Baranoski, Gladimir V. G. and 
                         Proen{\c{c}}a, Hugo Pedro and Junior, Antonio Lopes Apolinario 
                         and Papa, Jo{\~a}o Paulo and Pagliosa, Paulo and dos Santos, 
                         Thiago Oliveira and e S{\'a}, Asla Medeiros and da Silveira, 
                         Thiago Lopes Trugillo and Brazil, Emilio Vital and Ponti, Moacir 
                         A. and Fernandes, Leandro A. F. and Avila, Sandra",
         organization = "Conference on Graphics, Patterns and Images, 34. (SIBGRAPI)",
            publisher = "IEEE Computer Society",
              address = "Los Alamitos",
             keywords = "convolutional neural network, non-linear activation, 
                         associativity.",
             abstract = "Convolutional Neural Networks (CNNs) have been widely applied. But 
                         as the CNNs grow, the number of arithmetic operations and memory 
                         footprint also increases. Furthermore, typical non-linear 
                         activation functions do not allow associativity of the operations 
                         encoded by consecutive layers, preventing the simplification of 
                         intermediate steps by combining them. We present a new activation 
                         function that allows associativity between sequential layers of 
                         CNNs. Even though our activation function is non-linear, it can be 
                         represented by a sequence of linear operations in the conformal 
                         model for Euclidean geometry. In this domain, operations like, but 
                         not limited to, convolution, average pooling, and dropout remain 
                         linear. We take advantage of associativity to combine all the 
                         {"}conformal layers{"} and make the cost of inference constant 
                         regardless of the depth of the network.",
  conference-location = "Gramado, RS, Brazil (virtual)",
      conference-year = "18-22 Oct. 2021",
                  doi = "10.1109/SIBGRAPI54419.2021.00059",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI54419.2021.00059",
             language = "en",
                  ibi = "8JMKD3MGPEW34M/45CGCM8",
                  url = "http://urlib.net/ibi/8JMKD3MGPEW34M/45CGCM8",
           targetfile = "Main.pdf",
        urlaccessdate = "2024, May 06"
}


Close